In [2]:
import matplotlib.pyplot as plt
import cv2
from skimage.feature import hog
from skimage import data, exposure
import numpy as np
from PIL import Image
from google.colab.patches import cv2_imshow
In [ ]:
# Harris Corner Detection

img = cv2.imread('/content/chessboard.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
dst = cv2.dilate(dst,None)
img[dst>0.01*dst.max()]=[0,0,255]

#cv2.imshow('dst',img)
plt.imshow(dst),plt.show()
Out[ ]:
(<matplotlib.image.AxesImage at 0x7f8b7ca6c0f0>, None)
In [ ]:
#FOR SIFT (Since it has patent)

!pip install opencv-python==3.3.0.10 opencv-contrib-python==3.3.0.10
In [ ]:
#SIFT

img= cv2.imread('/content/sample.jpg')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
sift=cv2.xfeatures2d.SIFT_create()
kp=sift.detect(gray,None)
print(kp)

img=cv2.drawKeypoints(gray,kp,img)
cv2.imwrite('/content/abc.jpg',img)

img=cv2.drawKeypoints(gray,kp,img,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite('/content/abc.jpg',img)

sift=cv2.xfeatures2d.SIFT_create
sift_img= cv2.imread('/content/abc.jpg')
plt.imshow(sift_img),plt.show()
[<KeyPoint 0x7f503454d450>, <KeyPoint 0x7f503454d4e0>, <KeyPoint 0x7f503454d420>, <KeyPoint 0x7f503454d390>, <KeyPoint 0x7f503454d3f0>, <KeyPoint 0x7f503454d360>, <KeyPoint 0x7f503454d4b0>, <KeyPoint 0x7f503454d480>, <KeyPoint 0x7f503454d330>, <KeyPoint 0x7f503454d510>, <KeyPoint 0x7f503454d540>, <KeyPoint 0x7f503454d570>, <KeyPoint 0x7f503454d5a0>, <KeyPoint 0x7f503454d5d0>, <KeyPoint 0x7f503454d600>, <KeyPoint 0x7f503454d630>, <KeyPoint 0x7f503454d660>, <KeyPoint 0x7f503454d690>, <KeyPoint 0x7f503454d6c0>, <KeyPoint 0x7f503454d6f0>, <KeyPoint 0x7f503454d720>, <KeyPoint 0x7f503454d750>, <KeyPoint 0x7f503454d780>, <KeyPoint 0x7f503454d7b0>, <KeyPoint 0x7f503454d7e0>, <KeyPoint 0x7f503454d810>, <KeyPoint 0x7f503454d840>, <KeyPoint 0x7f503454d870>, <KeyPoint 0x7f503454d8a0>, <KeyPoint 0x7f503454d8d0>, <KeyPoint 0x7f503454d900>, <KeyPoint 0x7f503454d930>, <KeyPoint 0x7f503454d960>, <KeyPoint 0x7f503454d990>, <KeyPoint 0x7f503454d9c0>, <KeyPoint 0x7f503454d9f0>, <KeyPoint 0x7f503454da20>, <KeyPoint 0x7f503454da50>, <KeyPoint 0x7f503454da80>, <KeyPoint 0x7f503454dab0>]
Out[ ]:
(<matplotlib.image.AxesImage at 0x7f50325485f8>, None)
In [ ]:
#SURF

img= cv2.imread('/content/sample0.jpg')
surf=cv2.xfeatures2d.SURF_create()
kp, des = surf.detectAndCompute(img,None)
print(des)

img2 = cv2.drawKeypoints(img,kp,None,(255,0,0),4)
plt.imshow(img2),plt.show()
[[ 1.9852596e-03 -2.1491705e-03  6.0914378e-03 ...  4.9098273e-04
   5.1858427e-04  4.9098273e-04]
 [ 1.9773915e-03 -2.0619112e-03  5.6209951e-03 ...  1.2068795e-03
   1.4863303e-03  1.2068795e-03]
 [ 0.0000000e+00  0.0000000e+00  0.0000000e+00 ...  0.0000000e+00
   0.0000000e+00  0.0000000e+00]
 ...
 [-1.5410774e-03  1.3494986e-03  1.5410774e-03 ... -1.8530802e-03
   6.2489384e-03  1.8924858e-03]
 [-1.0814707e-03  1.6620645e-04  1.0904876e-03 ...  5.9490843e-04
   2.3132307e-03  6.3755369e-04]
 [ 0.0000000e+00  0.0000000e+00  0.0000000e+00 ...  5.5970188e-05
   2.1827429e-04  6.6174063e-05]]
Out[ ]:
(<matplotlib.image.AxesImage at 0x7fdc3a9d6160>, None)
In [ ]:
# Histogram of gradient

image = cv2.imread('/content/abc.jpg')
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
                    cells_per_block=(1, 1), visualize=True, multichannel=True)

fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 10), sharex=True, sharey=True)

ax1.axis('off')
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title('Input image')


hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))

ax2.axis('off')
ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
ax2.set_title('Histogram of Oriented Gradients')
plt.show()
win='hog image'
In [ ]:
import pickle
import os
import scipy
import scipy.spatial
In [ ]:
# Feature extractor
def extract_features(image_path, vector_size=32):
    image = cv2.imread(image_path)
    try:
        alg = cv2.xfeatures2d.SIFT_create()
        kps = alg.detect(image)
        kps = sorted(kps, key=lambda x: -x.response)[:vector_size]
        
        kps, dsc = alg.compute(image, kps)

        dsc = dsc.flatten()

        needed_size = (vector_size * 64)
        if dsc.size < needed_size:

            dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])
    except cv2.error as e:
        print ('Error: ', e)
        return None

    return dsc


def batch_extractor(images_path, pickled_db_path="features.pickle"):
    files = [os.path.join(images_path, p) for p in sorted(os.listdir(images_path))]

    result = {}
    for f in files:
        print ('Extracting features from image %s' % f)
        name = f.split('/')[-1].lower()
        result[name] = extract_features(f)
        #print(result[name])
    
    # saving all our feature vectors in pickled file
    with open(pickled_db_path, 'wb') as fp:
        pickle.dump(result, fp)
        
        
class Matcher(object):

    def __init__(self, pickled_db_path="features.pickle"):
        with open(pickled_db_path,'rb') as fp:
            self.data = pickle.load(fp)
        self.names = []
        self.matrix = []
        for k, v in self.data.items():
            self.names.append(k)
            self.matrix.append(v)
        self.matrix = np.array(self.matrix)
        self.names = np.array(self.names)

    def cos_cdist(self, vector):

        v = vector.reshape(1, -1)
        return scipy.spatial.distance.cdist(self.matrix, v, 'cosine').reshape(-1)

    def match(self, image_path, topn=5):
        features = extract_features(image_path)
        img_distances = self.cos_cdist(features)
        # getting top 5 records
        nearest_ids = np.argsort(img_distances)[:topn].tolist()
        nearest_img_paths = self.names[nearest_ids].tolist()

        return nearest_img_paths, img_distances[nearest_ids].tolist()
    

    
def show_img(path):
    img = cv2.imread(path)
    plt.imshow(img)
    plt.show()
    
def run():
    images_path = '/content/sample_data/images'
    files = [os.path.join(images_path, p) for p in sorted(os.listdir(images_path))]
    
    sample=files
    batch_extractor(images_path)
    ma = Matcher('features.pickle')
    
    for s in sample:
        print ('Query image ================')
        show_img(s)
        names, match = ma.match(s, topn=3)
        print ('Result images ===============')
        for i in range(3):

            print ('Match %s' % (1-match[i]))
            show_img(os.path.join(images_path, names[i]))

run()
Extracting features from image /content/sample_data/images/10815824_2997e03d76.jpg
Extracting features from image /content/sample_data/images/12830823_87d2654e31.jpg
Extracting features from image /content/sample_data/images/17273391_55cfc7d3d4.jpg
Extracting features from image /content/sample_data/images/19212715_20476497a3.jpg
Extracting features from image /content/sample_data/images/23445819_3a458716c1.jpg
Extracting features from image /content/sample_data/images/3637013_c675de7705.jpg
Extracting features from image /content/sample_data/images/667626_18933d713e.jpg
Query image ================
Result images ===============
Match 0.9999999999999998
Match 0.5111395614138058
Match 0.48988646539634717
Query image ================
Result images ===============
Match 1.0
Match 0.4833208161268936
Match 0.4427954777630885
Query image ================
Result images ===============
Match 1.0
Match 0.48751712434190364
Match 0.4745306198266199
Query image ================
Result images ===============
Match 1.0
Match 0.48942413621229963
Match 0.4698410669091656
Query image ================
Result images ===============
Match 1.0
Match 0.48988646539634717
Match 0.48714890041083536
Query image ================
Result images ===============
Match 1.0
Match 0.5111395614138058
Match 0.48751712434190364
Query image ================
Result images ===============
Match 1.0
Match 0.48942413621229963
Match 0.47772229014392265
In [ ]:
# Feature extractor
def extract_features(image_path, vector_size=32):
    image = cv2.imread(image_path)
    try:
        alg = cv2.xfeatures2d.SURF_create()
        kps = alg.detect(image)
        kps = sorted(kps, key=lambda x: -x.response)[:vector_size]
        
        kps, dsc = alg.compute(image, kps)

        dsc = dsc.flatten()

        needed_size = (vector_size * 64)
        if dsc.size < needed_size:

            dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])
    except cv2.error as e:
        print ('Error: ', e)
        return None

    return dsc


def batch_extractor(images_path, pickled_db_path="features.pickle"):
    files = [os.path.join(images_path, p) for p in sorted(os.listdir(images_path))]

    result = {}
    for f in files:
        print ('Extracting features from image %s' % f)
        name = f.split('/')[-1].lower()
        result[name] = extract_features(f)
        #print(result[name])
    
    # saving all our feature vectors in pickled file
    with open(pickled_db_path, 'wb') as fp:
        pickle.dump(result, fp)
        
        
class Matcher(object):

    def __init__(self, pickled_db_path="features.pickle"):
        with open(pickled_db_path,'rb') as fp:
            self.data = pickle.load(fp)
        self.names = []
        self.matrix = []
        for k, v in self.data.items():
            self.names.append(k)
            self.matrix.append(v)
        self.matrix = np.array(self.matrix)
        self.names = np.array(self.names)

    def cos_cdist(self, vector):

        v = vector.reshape(1, -1)
        return scipy.spatial.distance.cdist(self.matrix, v, 'cosine').reshape(-1)

    def match(self, image_path, topn=5):
        features = extract_features(image_path)
        img_distances = self.cos_cdist(features)
        # getting top 5 records
        nearest_ids = np.argsort(img_distances)[:topn].tolist()
        nearest_img_paths = self.names[nearest_ids].tolist()

        return nearest_img_paths, img_distances[nearest_ids].tolist()
    

    
def show_img(path):
    img = cv2.imread(path)
    plt.imshow(img)
    plt.show()
    
def run():
    images_path = '/content/sample_data/images/'
    files = [os.path.join(images_path, p) for p in sorted(os.listdir(images_path))]
    
    sample=files
    batch_extractor(images_path)
    ma = Matcher('features.pickle')
    
    for s in sample:
        print ('Query image ================')
        show_img(s)
        names, match = ma.match(s, topn=3)
        print ('Result images ===============')
        for i in range(3):

            print ('Match %s' % (1-match[i]))
            show_img(os.path.join(images_path, names[i]))

run()
Extracting features from image /content/sample_data/images/10815824_2997e03d76.jpg
Extracting features from image /content/sample_data/images/12830823_87d2654e31.jpg
Extracting features from image /content/sample_data/images/17273391_55cfc7d3d4.jpg
Extracting features from image /content/sample_data/images/19212715_20476497a3.jpg
Extracting features from image /content/sample_data/images/23445819_3a458716c1.jpg
Extracting features from image /content/sample_data/images/3637013_c675de7705.jpg
Extracting features from image /content/sample_data/images/667626_18933d713e.jpg
Query image ================
Result images ===============
Match 0.9999999999999999
Match 0.6801788069522481
Match 0.6715966383429789
Query image ================
Result images ===============
Match 1.0
Match 0.7223218118889655
Match 0.7126380393235985
Query image ================
Result images ===============
Match 1.0
Match 0.685736246559786
Match 0.6634554716138431
Query image ================
Result images ===============
Match 1.0
Match 0.6683015057074917
Match 0.6640859041412119
Query image ================
Result images ===============
Match 1.0
Match 0.7126380393235985
Match 0.7045288553605836
Query image ================
Result images ===============
Match 1.0
Match 0.7223218118889655
Match 0.7045288553605836
Query image ================
Result images ===============
Match 1.0
Match 0.6715966383429789
Match 0.6683015057074917